From 4890023a140575f9266c8de5c91671c8b77c8883 Mon Sep 17 00:00:00 2001 From: "mafetter@fleming.research" Date: Tue, 5 Apr 2005 07:38:03 +0000 Subject: [PATCH] bitkeeper revision 1.1268 (4252405bviDCnobrL9rMLhSPvqKOKw) Manual cleanup after merge Signed-off-by: michael.fetterman@cl.cam.ac.uk --- xen/arch/x86/audit.c | 2 +- xen/arch/x86/mm.c | 45 ++++++++++++++++++++++++++++-------- xen/arch/x86/shadow.c | 8 ++++--- xen/include/asm-x86/mm.h | 3 ++- xen/include/asm-x86/shadow.h | 16 ++++++------- 5 files changed, 51 insertions(+), 23 deletions(-) diff --git a/xen/arch/x86/audit.c b/xen/arch/x86/audit.c index 6d8b2f48a9..5faca40f5f 100644 --- a/xen/arch/x86/audit.c +++ b/xen/arch/x86/audit.c @@ -683,7 +683,7 @@ void _audit_domain(struct domain *d, int flags) if ( d != current->domain ) domain_pause(d); - synchronise_pagetables(~0UL); + sync_lazy_execstate_all(); // Maybe we should just be using BIGLOCK? // diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c index cf49117025..9ee905cea9 100644 --- a/xen/arch/x86/mm.c +++ b/xen/arch/x86/mm.c @@ -199,8 +199,7 @@ void write_ptbase(struct exec_domain *ed) write_cr3(pagetable_val(ed->arch.monitor_table)); } - -static inline void invalidate_shadow_ldt(struct exec_domain *d) +void invalidate_shadow_ldt(struct exec_domain *d) { int i; unsigned long pfn; @@ -1306,6 +1305,7 @@ int new_guest_cr3(unsigned long mfn) static void process_deferred_ops(unsigned int cpu) { unsigned int deferred_ops; + struct domain *d = current->domain; deferred_ops = percpu_info[cpu].deferred_ops; percpu_info[cpu].deferred_ops = 0; @@ -1462,6 +1462,9 @@ int do_mmuext_op( type = PGT_l1_page_table | PGT_va_mutable; pin_page: + if ( shadow_mode_enabled(FOREIGNDOM) ) + type = PGT_writable_page; + okay = get_page_and_type_from_pagenr(op.mfn, type, FOREIGNDOM); if ( unlikely(!okay) ) { @@ -1516,6 +1519,7 @@ int do_mmuext_op( case MMUEXT_NEW_BASEPTR: okay = new_guest_cr3(op.mfn); + percpu_info[cpu].deferred_ops &= ~DOP_FLUSH_TLB; break; #ifdef __x86_64__ @@ -1542,6 +1546,8 @@ int do_mmuext_op( break; case MMUEXT_INVLPG_LOCAL: + if ( shadow_mode_enabled(d) ) + shadow_invlpg(ed, op.linear_addr); local_flush_tlb_one(op.linear_addr); break; @@ -1556,17 +1562,25 @@ int do_mmuext_op( } pset = vcpuset_to_pcpuset(d, vset); if ( op.cmd == MMUEXT_TLB_FLUSH_MULTI ) + { + BUG_ON(shadow_mode_enabled(d) && ((pset & d->cpuset) != (1<cpuset); + } else + { + BUG_ON(shadow_mode_enabled(d) && ((pset & d->cpuset) != (1<cpuset, op.linear_addr); + } break; } case MMUEXT_TLB_FLUSH_ALL: + BUG_ON(shadow_mode_enabled(d) && (d->cpuset != (1<cpuset); break; case MMUEXT_INVLPG_ALL: + BUG_ON(shadow_mode_enabled(d) && (d->cpuset != (1<cpuset, op.linear_addr); break; @@ -1584,6 +1598,15 @@ int do_mmuext_op( case MMUEXT_SET_LDT: { + if ( shadow_mode_external(d) ) + { + // ignore this request from an external domain... + MEM_LOG("ignoring SET_LDT hypercall from external " + "domain %u\n", d->id); + okay = 0; + break; + } + unsigned long ptr = op.linear_addr; unsigned long ents = op.nr_ents; if ( ((ptr & (PAGE_SIZE-1)) != 0) || @@ -1732,7 +1755,7 @@ int do_mmu_update( unsigned int foreigndom) { mmu_update_t req; - unsigned long va = 0, pfn, prev_pfn = 0; + unsigned long va = 0, mfn, prev_mfn = 0, gpfn; struct pfn_info *page; int rc = 0, okay = 1, i = 0, cpu = smp_processor_id(); unsigned int cmd, done = 0; @@ -1747,9 +1770,6 @@ int do_mmu_update( if ( unlikely(shadow_mode_enabled(d)) ) check_pagetable(ed, "pre-mmu"); /* debug */ - if ( unlikely(shadow_mode_translate(d)) ) - domain_crash_synchronous(); - if ( unlikely(count & MMU_UPDATE_PREEMPTED) ) { count &= ~MMU_UPDATE_PREEMPTED; @@ -1875,7 +1895,8 @@ int do_mmu_update( __mark_dirty(d, mfn); gpfn = __mfn_to_gpfn(d, mfn); - ASSERT(gpfn); + ASSERT(VALID_M2P(gpfn)); + if ( page_is_page_table(page) ) shadow_mark_mfn_out_of_sync(ed, gpfn, mfn); } @@ -2012,7 +2033,10 @@ int update_shadow_va_mapping(unsigned long va, if ( unlikely(__put_user(val, &l1_pgentry_val( linear_pg_table[l1_linear_offset(va)]))) ) - return -EINVAL; + { + rc = -EINVAL; + goto out; + } // also need to update the shadow @@ -2027,6 +2051,7 @@ int update_shadow_va_mapping(unsigned long va, if ( shadow_mode_log_dirty(d) ) mark_dirty(d, va_to_l1mfn(ed, va)); + out: shadow_unlock(d); check_pagetable(ed, "post-va"); /* debug */ @@ -2658,8 +2683,8 @@ int ptwr_do_page_fault(unsigned long addr) u32 l2_idx; struct exec_domain *ed = current; - // not supported in combination with various shadow modes! - ASSERT( !shadow_mode_enabled(ed->domain) ); + if ( unlikely(shadow_mode_enabled(ed->domain)) ) + return 0; /* * Attempt to read the PTE that maps the VA being accessed. By checking for diff --git a/xen/arch/x86/shadow.c b/xen/arch/x86/shadow.c index 9db6c7616d..d0adbdba45 100644 --- a/xen/arch/x86/shadow.c +++ b/xen/arch/x86/shadow.c @@ -590,10 +590,10 @@ static void alloc_monitor_pagetable(struct exec_domain *ed) struct pfn_info *mmfn_info; struct domain *d = ed->domain; - ASSERT(!pagetable_val(ed->arch.monitor_table)); /* we should only get called once */ + ASSERT(pagetable_val(ed->arch.monitor_table) == 0); mmfn_info = alloc_domheap_page(NULL); - ASSERT( mmfn_info ); + ASSERT(mmfn_info != NULL); mmfn = (unsigned long) (mmfn_info - frame_table); mpl2e = (l2_pgentry_t *) map_domain_mem(mmfn << PAGE_SHIFT); @@ -2756,7 +2756,7 @@ int _check_pagetable(struct exec_domain *ed, char *s) shadow_lock(d); sh_check_name = s; - SH_VVLOG("%s-PT Audit", s); + //SH_VVLOG("%s-PT Audit", s); sh_l2_present = sh_l1_present = 0; perfc_incrc(check_pagetable); @@ -2802,8 +2802,10 @@ int _check_pagetable(struct exec_domain *ed, char *s) unmap_domain_mem(spl2e); unmap_domain_mem(gpl2e); +#if 0 SH_VVLOG("PT verified : l2_present = %d, l1_present = %d", sh_l2_present, sh_l1_present); +#endif out: if ( errors ) diff --git a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h index 9bc85511fa..5d0bb6c030 100644 --- a/xen/include/asm-x86/mm.h +++ b/xen/include/asm-x86/mm.h @@ -246,7 +246,8 @@ int check_descriptor(struct desc_struct *d); #undef machine_to_phys_mapping #define machine_to_phys_mapping ((u32 *)RDWR_MPT_VIRT_START) #define INVALID_M2P_ENTRY (~0U) -#define IS_INVALID_M2P_ENTRY(_e) (!!((_e) & (1U<<31))) +#define VALID_M2P(_e) (!((_e) & (1U<<31))) +#define IS_INVALID_M2P_ENTRY(_e) (!VALID_M2P(_e)) /* * The phys_to_machine_mapping is the reversed mapping of MPT for full diff --git a/xen/include/asm-x86/shadow.h b/xen/include/asm-x86/shadow.h index e5916536eb..4aa84b1d17 100644 --- a/xen/include/asm-x86/shadow.h +++ b/xen/include/asm-x86/shadow.h @@ -222,11 +222,11 @@ struct out_of_sync_entry { #define SHADOW_SNAPSHOT_ELSEWHERE (-1L) /************************************************************************/ -#define SHADOW_DEBUG 0 -#define SHADOW_VERBOSE_DEBUG 0 -#define SHADOW_VVERBOSE_DEBUG 0 -#define SHADOW_HASH_DEBUG 0 -#define FULLSHADOW_DEBUG 0 +#define SHADOW_DEBUG 1 +#define SHADOW_VERBOSE_DEBUG 1 +#define SHADOW_VVERBOSE_DEBUG 1 +#define SHADOW_HASH_DEBUG 1 +#define FULLSHADOW_DEBUG 1 #if SHADOW_DEBUG extern int shadow_status_noswap; @@ -373,7 +373,7 @@ update_hl2e(struct exec_domain *ed, unsigned long va) if ( need_flush ) { perfc_incrc(update_hl2e_invlpg); - __flush_tlb_one(&linear_pg_table[l1_linear_offset(va)]); + local_flush_tlb_one(&linear_pg_table[l1_linear_offset(va)]); } } @@ -959,7 +959,7 @@ static inline unsigned long ___shadow_status( perfc_incrc(shadow_status_hit_head); } - SH_VVLOG("lookup gpfn=%p => status=%p", key, head->smfn); + //SH_VVLOG("lookup gpfn=%p => status=%p", key, head->smfn); return head->smfn; } @@ -968,7 +968,7 @@ static inline unsigned long ___shadow_status( } while ( x != NULL ); - SH_VVLOG("lookup gpfn=%p => status=0", key); + //SH_VVLOG("lookup gpfn=%p => status=0", key); perfc_incrc(shadow_status_miss); return 0; } -- 2.30.2